type runtime.g

200 uses

	runtime (current package)
		cgo_sigaction.go#L45: 		var g *g
		cgocall.go#L404: 		defer func(gp *g) {
		chan.go#L748: func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool {
		coro.go#L109: func coroswitch_m(gp *g) {
		coro.go#L180: 	var gnext *g
		debugcall.go#L164: 	mcall(func(gp *g) {
		debugcall.go#L206: 	callingG *g
		debugcall.go#L222: 	mcall(func(gp *g) {
		heapdump.go#L341: func dumpgoroutine(gp *g) {
		heapdump.go#L412: 	forEachG(func(gp *g) {
		lock_futex.go#L129: func beforeIdle(int64, int64) (*g, bool) {
		mfinal.go#L48: 	fing        *g        // goroutine that runs finalizers
		mfinal.go#L162: func wakefing() *g {
		mfinal.go#L176: func finalizercommit(gp *g, lock unsafe.Pointer) bool {
		mgc.go#L387: 	stackRoots []*g
		mgc.go#L1463: 		gopark(func(g *g, nodep unsafe.Pointer) bool {
		mgc.go#L1783: 	forEachG(func(gp *g) {
		mgcmark.go#L133: 	forEachGRace(func(gp *g) {
		mgcmark.go#L312: 	var tail *g
		mgcmark.go#L441: func gcAssistAlloc(gp *g) {
		mgcmark.go#L653: func gcAssistAlloc1(gp *g, scanWork int64) {
		mgcmark.go#L856: func scanstack(gp *g, gcw *gcWork) int64 {
		mgcpacer.go#L754: func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
		mgcscavenge.go#L282: 	g *g
		mgcsweep.go#L37: 	g      *g
		mprof.go#L1375: 	forEachGRace(func(gp1 *g) {
		mprof.go#L1388: 	forEachGRace(func(gp1 *g) {
		mprof.go#L1416: func tryRecordGoroutineProfileWB(gp1 *g) {
		mprof.go#L1426: func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
		mprof.go#L1470: func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
		mprof.go#L1514: 	isOK := func(gp1 *g) bool {
		mprof.go#L1525: 	forEachGRace(func(gp1 *g) {
		mprof.go#L1550: 		forEachGRace(func(gp1 *g) {
		mprof.go#L1604: func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
		netpoll.go#L434: 	var rg, wg *g
		netpoll.go#L460: 	var rg, wg *g
		netpoll.go#L496: 	var rg, wg *g
		netpoll.go#L529: func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
		netpoll.go#L540: func netpollgoready(gp *g, traceskip int) {
		netpoll.go#L591: func netpollunblock(pd *pollDesc, mode int32, ioready bool, delta *int32) *g {
		netpoll.go#L617: 			return (*g)(unsafe.Pointer(old))
		netpoll.go#L636: 	var rg *g
		netpoll.go#L645: 	var wg *g
		panic.go#L530: func popDefer(gp *g) {
		panic.go#L1138: func recovery(gp *g) {
		panic.go#L1408: func dopanic_m(gp *g, pc, sp uintptr, bubble *synctestBubble) bool {
		panic.go#L1439: 				tracebacksomeothers(gp, func(other *g) bool {
		panic.go#L1500: func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
		preempt.go#L62: 	g *g
		preempt.go#L105: func suspendG(gp *g) suspendGState {
		preempt.go#L342: func wantAsyncPreempt(gp *g) bool {
		preempt.go#L363: func isAsyncSafePoint(gp *g, pc, sp, lr uintptr) (bool, uintptr) {
		proc.go#L119: 	g0           g
		proc.go#L443: func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
		proc.go#L479: func goready(gp *g, traceskip int) {
		proc.go#L574: func badmcall(fn func(*g)) {
		proc.go#L578: func badmcall2(fn func(*g)) {
		proc.go#L618: var gcrash g
		proc.go#L620: var crashingG atomic.Pointer[g]
		proc.go#L668: 	allgs    []*g
		proc.go#L683: 	allgptr **g
		proc.go#L686: func allgadd(gp *g) {
		proc.go#L703: func allGsSnapshot() []*g {
		proc.go#L715: func atomicAllG() (**g, uintptr) {
		proc.go#L717: 	ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
		proc.go#L722: func atomicAllGIndex(ptr **g, i uintptr) *g {
		proc.go#L723: 	return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
		proc.go#L729: func forEachG(fn func(gp *g)) {
		proc.go#L741: func forEachGRace(fn func(gp *g)) {
		proc.go#L955: func dumpgstatus(gp *g) {
		proc.go#L1117: func ready(gp *g, traceskip int, next bool) {
		proc.go#L1204: func readgstatus(gp *g) uint32 {
		proc.go#L1212: func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
		proc.go#L1240: func castogscanstatus(gp *g, oldval, newval uint32) bool {
		proc.go#L1270: func casgstatus(gp *g, oldval, newval uint32) {
		proc.go#L1380: func casGToWaiting(gp *g, old uint32, reason waitReason) {
		proc.go#L1390: func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
		proc.go#L1401: func casGToPreemptScan(gp *g, old, new uint32) {
		proc.go#L1419: func casGFromPreempted(gp *g, old, new uint32) bool {
		proc.go#L3283: func startlockedm(gp *g) {
		proc.go#L3336: func execute(gp *g, inheritTime bool) {
		proc.go#L3377: func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
		proc.go#L3816: func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
		proc.go#L3929: func checkIdleGCNoP() (*p, *g) {
		proc.go#L4040: 	var tail *g
		proc.go#L4223: func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
		proc.go#L4229: func park_m(gp *g) {
		proc.go#L4283: func goschedImpl(gp *g, preempted bool) {
		proc.go#L4318: func gosched_m(gp *g) {
		proc.go#L4323: func goschedguarded_m(gp *g) {
		proc.go#L4330: func gopreempt_m(gp *g) {
		proc.go#L4337: func preemptPark(gp *g) {
		proc.go#L4412: func goyield_m(gp *g) {
		proc.go#L4447: func goexit0(gp *g) {
		proc.go#L4452: func gdestroy(gp *g) {
		proc.go#L4965: func exitsyscall0(gp *g) {
		proc.go#L5136: func malg(stacksize int32) *g {
		proc.go#L5137: 	newg := new(g)
		proc.go#L5176: func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
		proc.go#L5287: func saveAncestors(callergp *g) *[]ancestorInfo {
		proc.go#L5320: func gfput(pp *p, gp *g) {
		proc.go#L5362: func gfget(pp *p) *g {
		proc.go#L5592: func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
		proc.go#L6145: 	forEachG(func(gp *g) {
		proc.go#L6602: 	forEachG(func(gp *g) {
		proc.go#L6622: 	g    *g
		proc.go#L6811: func schedEnabled(gp *g) bool {
		proc.go#L6855: func globrunqput(gp *g) {
		proc.go#L6866: func globrunqputhead(gp *g) {
		proc.go#L6887: func globrunqget() *g {
		proc.go#L6899: func globrunqgetbatch(n int32) (gp *g, q gQueue) {
		proc.go#L7058: func runqput(pp *p, gp *g, next bool) {
		proc.go#L7104: func runqputslow(pp *p, gp *g, h, t uint32) bool {
		proc.go#L7105: 	var batch [len(pp.runq)/2 + 1]*g
		proc.go#L7178: func runqget(pp *p) (gp *g, inheritTime bool) {
		proc.go#L7297: func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
		proc.go#L7330: func (q *gQueue) push(gp *g) {
		proc.go#L7340: func (q *gQueue) pushBack(gp *g) {
		proc.go#L7369: func (q *gQueue) pop() *g {
		proc.go#L7401: func (l *gList) push(gp *g) {
		proc.go#L7417: func (l *gList) pop() *g {
		race0.go#L31: func raceacquireg(gp *g, addr unsafe.Pointer)                               { throw("race") }
		race0.go#L34: func racereleaseg(gp *g, addr unsafe.Pointer)                               { throw("race") }
		race0.go#L36: func racereleaseacquireg(gp *g, addr unsafe.Pointer)                        { throw("race") }
		race0.go#L38: func racereleasemergeg(gp *g, addr unsafe.Pointer)                          { throw("race") }
		runtime2.go#L239: func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
		runtime2.go#L242: func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
		runtime2.go#L250: func (gp *g) guintptr() guintptr {
		runtime2.go#L259: func setGNoWB(gp **g, new *g) {
		runtime2.go#L333: 	g *g
		runtime2.go#L394: type g struct {
		runtime2.go#L533: 	g0      *g     // goroutine with scheduling stack
		runtime2.go#L539: 	gsignal         *g                // signal-handling g
		runtime2.go#L544: 	curg            *g       // current running goroutine
		runtime2.go#L587: 	waitunlockf          func(*g, unsafe.Pointer) bool
		runtime2.go#L974: 	g    *g
		select.go#L63: func selparkcommit(gp *g, _ unsafe.Pointer) bool {
		signal_amd64.go#L49: func (c *sigctxt) preparePanic(sig uint32, gp *g) {
		signal_unix.go#L341: func doSigPreempt(gp *g, ctxt *sigctxt) {
		signal_unix.go#L398: func sigFetchG(c *sigctxt) *g {
		signal_unix.go#L411: 				gp := *(**g)(unsafe.Pointer(s.base()))
		signal_unix.go#L613: var testSigtrap func(info *siginfo, ctxt *sigctxt, gp *g) bool
		signal_unix.go#L614: var testSigusr1 func(gp *g) bool
		signal_unix.go#L640: func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
		signal_unix.go#L839: func fatalsignal(sig uint32, c *sigctxt, gp *g, mp *m) *g {
		stack.go#L776: func adjustctxt(gp *g, adjinfo *adjustinfo) {
		stack.go#L802: func adjustdefers(gp *g, adjinfo *adjustinfo) {
		stack.go#L814: func adjustpanics(gp *g, adjinfo *adjustinfo) {
		stack.go#L820: func adjustsudogs(gp *g, adjinfo *adjustinfo) {
		stack.go#L834: func findsghi(gp *g, stk stack) uintptr {
		stack.go#L848: func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
		stack.go#L899: func copystack(gp *g, newsize uintptr) {
		stack.go#L1197: func isShrinkStackSafe(gp *g) bool {
		stack.go#L1235: func shrinkstack(gp *g) {
		stubs.go#L31: func getg() *g
		stubs.go#L47: func mcall(fn func(*g))
		stubs.go#L217: func setg(gg *g)
		synctest.go#L19: 	root    *g     // caller of synctest.Run
		synctest.go#L20: 	waiter  *g     // caller of synctest.Wait
		synctest.go#L21: 	main    *g     // goroutine started by synctest.Run
		synctest.go#L43: func (bubble *synctestBubble) changegstatus(gp *g, oldval, newval uint32) {
		synctest.go#L132: func (bubble *synctestBubble) maybeWakeLocked() *g {
		synctest.go#L269: func synctestidle_c(gp *g, _ unsafe.Pointer) bool {
		synctest.go#L316: func synctestwait_c(gp *g, _ unsafe.Pointer) bool {
		time.go#L371: func resetForSleep(gp *g, _ unsafe.Pointer) bool {
		time.go#L442: 	goready(arg.(*g), 0)
		trace.go#L94: 	reader atomic.Pointer[g] // goroutine that called ReadTrace, or nil
		trace.go#L352: 		gp           *g
		trace.go#L361: 	forEachGRace(func(gp *g) {
		trace.go#L781: 		gopark(func(gp *g, _ unsafe.Pointer) bool {
		trace.go#L947: func traceReader() *g {
		trace.go#L958: func traceReaderAvailable() *g {
		traceallocfree.go#L79: 	forEachGRace(func(gp *g) {
		traceback.go#L122: func (u *unwinder) init(gp *g, flags unwindFlags) {
		traceback.go#L132: func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) {
		traceback.go#L774: func printcreatedby(gp *g) {
		traceback.go#L802: func traceback(pc, sp, lr uintptr, gp *g) {
		traceback.go#L814: func tracebacktrap(pc, sp, lr uintptr, gp *g) {
		traceback.go#L823: func traceback1(pc, sp, lr uintptr, gp *g, flags unwindFlags) {
		traceback.go#L1105: func gcallers(gp *g, skip int, pcbuf []uintptr) int {
		traceback.go#L1113: func showframe(sf srcFunc, gp *g, firstFrame bool, calleeID abi.FuncID) bool {
		traceback.go#L1212: func goroutineheader(gp *g) {
		traceback.go#L1270: func tracebackothers(me *g) {
		traceback.go#L1271: 	tracebacksomeothers(me, func(*g) bool { return true })
		traceback.go#L1274: func tracebacksomeothers(me *g, showf func(*g) bool) {
		traceback.go#L1292: 	forEachGRace(func(gp *g) {
		traceback.go#L1367: func isSystemGoroutine(gp *g, fixed bool) bool {
		tracecpu.go#L213: func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
		traceruntime.go#L419: func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
		traceruntime.go#L472: func (tl traceLocker) GoUnpark(gp *g, skip int) {
		traceruntime.go#L480: func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
		traceruntime.go#L493: func (tl traceLocker) emitUnblockStatus(gp *g, gen uintptr) {
		traceruntime.go#L592: func (tl traceLocker) GoCreateSyscall(gp *g) {
		tracestack.go#L32: func traceStack(skip int, gp *g, tab *traceStackTable) uint64 {